if ( alert_counter[cpu] == 5*nmi_hz )
{
console_force_unlock();
+ disable_criticalregion_checking();
die("NMI Watchdog detected LOCKUP on CPU", regs, cpu);
}
}
perfc_incrc(page_faults);
+ ASSERT_no_criticalregion();
+
if ( unlikely(addr >= LDT_VIRT_START) &&
(addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
{
trap_info_t *ti;
unsigned long fixup;
+ ASSERT_no_criticalregion();
+
/* Badness if error in ring 0, or result of an interrupt. */
if ( !(regs->xcs & 3) || (error_code & 1) )
goto gp_in_kernel;
asmlinkage void mem_parity_error(struct pt_regs *regs)
{
console_force_unlock();
+ disable_criticalregion_checking();
printk("\n\n");
asmlinkage void io_check_error(struct pt_regs *regs)
{
console_force_unlock();
+ disable_criticalregion_checking();
printk("\n\n");
/* No-op hypercall. */
return -ENOSYS;
}
+
+/*
+ * Lock debugging
+ */
+
+#ifndef NDEBUG
+
+static int crit_count[NR_CPUS];
+static int crit_checking = 1;
+
+void disable_criticalregion_checking(void)
+{
+ crit_checking = 0;
+}
+
+void criticalregion_enter(void)
+{
+ int cpu = smp_processor_id();
+ ASSERT(crit_count[cpu] >= 0);
+ crit_count[cpu]++;
+}
+
+void criticalregion_exit(void)
+{
+ int cpu = smp_processor_id();
+ crit_count[cpu]--;
+ ASSERT(crit_count[cpu] >= 0);
+}
+
+void ASSERT_no_criticalregion(void)
+{
+ int cpu = smp_processor_id();
+ if ( (crit_count[cpu] == 0) || !crit_checking )
+ return;
+ disable_criticalregion_checking();
+ ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */
+ ASSERT(crit_count[cpu] == 0); /* we should definitely take this path */
+ ASSERT(1); /* NEVER GET HERE! */
+}
+
+#endif /* !NDEBUG */
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
-static inline void spin_lock(spinlock_t *lock)
+static inline void _raw_spin_lock(spinlock_t *lock)
{
__asm__ __volatile__ (
"1: lock; decb %0 \n"
: "=m" (lock->lock) : : "memory" );
}
-static inline void spin_unlock(spinlock_t *lock)
+static inline void _raw_spin_unlock(spinlock_t *lock)
{
#if !defined(CONFIG_X86_OOSTORE)
ASSERT(spin_is_locked(lock));
#endif
}
-static inline int spin_trylock(spinlock_t *lock)
+static inline int _raw_spin_trylock(spinlock_t *lock)
{
char oldval;
__asm__ __volatile__(
* are any critical regions that cannot form part of such a set, they can use
* standard spin_[un]lock().
*/
-#define spin_lock_recursive(_lock) \
+#define _raw_spin_lock_recursive(_lock) \
do { \
int cpu = smp_processor_id(); \
if ( likely((_lock)->recurse_cpu != cpu) ) \
(_lock)->recurse_cnt++; \
} while ( 0 )
-#define spin_unlock_recursive(_lock) \
+#define _raw_spin_unlock_recursive(_lock) \
do { \
if ( likely(--(_lock)->recurse_cnt == 0) ) \
{ \
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
*/
-static inline void read_lock(rwlock_t *rw)
+static inline void _raw_read_lock(rwlock_t *rw)
{
__build_read_lock(rw, "__read_lock_failed");
}
-static inline void write_lock(rwlock_t *rw)
+static inline void _raw_write_lock(rwlock_t *rw)
{
__build_write_lock(rw, "__write_lock_failed");
}
-#define read_unlock(rw) \
+#define _raw_read_unlock(rw) \
__asm__ __volatile__ ( \
"lock ; incl %0" : \
"=m" ((rw)->lock) : : "memory" )
-#define write_unlock(rw) \
+#define _raw_write_unlock(rw) \
__asm__ __volatile__ ( \
"lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \
"=m" ((rw)->lock) : : "memory" )
-static inline int write_trylock(rwlock_t *lock)
-{
- atomic_t *count = (atomic_t *)lock;
- if ( atomic_sub_and_test(RW_LOCK_BIAS, count) )
- return 1;
- atomic_add(RW_LOCK_BIAS, count);
- return 0;
-}
-
#endif /* __ASM_SPINLOCK_H */
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ ASSERT_no_criticalregion(); \
__asm__ __volatile__( \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ ASSERT_no_criticalregion(); \
__asm__ __volatile__( \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
static always_inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ ASSERT_no_criticalregion();
if (__builtin_constant_p(n)) {
unsigned long ret;
static always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ ASSERT_no_criticalregion();
if (__builtin_constant_p(n)) {
unsigned long ret;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#endif
-#define spin_lock_init(lock) do { } while(0)
-#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
-#define spin_is_locked(lock) (0)
-#define spin_trylock(lock) ({1; })
-#define spin_unlock_wait(lock) do { } while(0)
-#define spin_unlock(lock) do { } while(0)
-#define spin_lock_recursive(lock) do { } while(0)
-#define spin_unlock_recursive(lock) do { } while(0)
+#define spin_lock_init(lock) do { } while(0)
+#define spin_is_locked(lock) (0)
+#define _raw_spin_lock(lock) (void)(lock)
+#define _raw_spin_trylock(lock) ({1; })
+#define _raw_spin_unlock(lock) do { } while(0)
+#define _raw_spin_lock_recursive(lock) do { } while(0)
+#define _raw_spin_unlock_recursive(lock) do { } while(0)
#if (__GNUC__ > 2)
typedef struct { } rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif
-#define rwlock_init(lock) do { } while(0)
-#define read_lock(lock) (void)(lock) /* Not "unused variable". */
-#define read_unlock(lock) do { } while(0)
-#define write_lock(lock) (void)(lock) /* Not "unused variable". */
-#define write_unlock(lock) do { } while(0)
+#define rwlock_init(lock) do { } while(0)
+#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
+#define _raw_read_unlock(lock) do { } while(0)
+#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
+#define _raw_write_unlock(lock) do { } while(0)
+
+#endif
+
+#ifndef NDEBUG
+
+extern void criticalregion_enter(void);
+extern void criticalregion_exit(void);
+extern void ASSERT_no_criticalregion(void);
+extern void disable_criticalregion_checking(void);
+
+#define spin_lock(_lock) \
+ do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0)
+#define spin_unlock(_lock) \
+ do { _raw_spin_unlock(_lock); criticalregion_exit(); } while (0)
+#define spin_lock_recursive(_lock) \
+ do { criticalregion_enter(); _raw_spin_lock_recursive(_lock); } while (0)
+#define spin_unlock_recursive(_lock) \
+ do { _raw_spin_unlock_recursive(_lock); criticalregion_exit(); } while (0)
+#define read_lock(_lock) \
+ do { criticalregion_enter(); _raw_read_lock(_lock); } while (0)
+#define read_unlock(_lock) \
+ do { _raw_read_unlock(_lock); criticalregion_exit(); } while (0)
+#define write_lock(_lock) \
+ do { criticalregion_enter(); _raw_write_lock(_lock); } while (0)
+#define write_unlock(_lock) \
+ do { _raw_write_unlock(_lock); criticalregion_exit(); } while (0)
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ criticalregion_enter();
+ if ( !_raw_spin_trylock(lock) )
+ {
+ criticalregion_exit();
+ return 0;
+ }
+ return 1;
+}
+
+#else
+
+#define ASSERT_no_criticalregion() ((void)0)
+#define disable_criticalregion_checking() ((void)0)
+
+#define spin_lock(_lock) _raw_spin_lock(_lock)
+#define spin_trylock(_lock) _raw_spin_trylock(_lock)
+#define spin_unlock(_lock) _raw_spin_unlock(_lock)
+#define spin_lock_recursive(_lock) _raw_spin_lock_recursive(_lock)
+#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock)
+#define read_lock(_lock) _raw_read_lock(_lock)
+#define read_unlock(_lock) _raw_read_unlock(_lock)
+#define write_lock(_lock) _raw_write_lock(_lock)
+#define write_unlock(_lock) _raw_write_unlock(_lock)
#endif